perfc_incrc(calls_to_mmu_update);
perfc_addc(num_page_updates, count);
+ perfc_incr_histo(bpt_updates, count, PT_UPDATES);
if ( unlikely(!array_access_ok(VERIFY_READ, ureqs, count, sizeof(req))) )
{
int i, cpu = smp_processor_id();
struct exec_domain *ed = current;
struct domain *d = ed->domain;
+#ifdef PERF_COUNTERS
+ unsigned int modified = 0;
+#endif
l1va = ptwr_info[cpu].ptinfo[which].l1va;
ptep = (unsigned long *)&linear_pg_table[l1_linear_offset(l1va)];
if ( likely(l1_pgentry_val(ol1e) == l1_pgentry_val(nl1e)) )
continue;
+#ifdef PERF_COUNTERS
+ /* Update number of entries modified. */
+ modified++;
+#endif
+
/*
* Fast path for PTEs that have merely been write-protected
* (e.g., during a Unix fork()). A strict reduction in privilege.
}
unmap_domain_mem(pl1e);
+ perfc_incr_histo(wpt_updates, modified, PT_UPDATES);
+
/*
* STEP 3. Reattach the L1 p.t. page into the current address space.
*/
* to extract and format the required data.
*/
+#include <xen/config.h>
#include <xen/sched.h>
#define DEFINE(_sym, _val) \
OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
BLANK();
+#if PERF_COUNTERS
+ OFFSET(PERFC_hypercalls, struct perfcounter, hypercalls);
+ OFFSET(PERFC_exceptions, struct perfcounter, exceptions);
+ BLANK();
+#endif
+
OFFSET(MULTICALL_op, multicall_entry_t, op);
OFFSET(MULTICALL_arg0, multicall_entry_t, args[0]);
OFFSET(MULTICALL_arg1, multicall_entry_t, args[1]);
SAVE_ALL(b)
sti
GET_CURRENT(%ebx)
- andl $(NR_hypercalls-1),%eax
- call *SYMBOL_NAME(hypercall_table)(,%eax,4)
+ andl $(NR_hypercalls-1),%eax
+ PERFC_INCR(PERFC_hypercalls, %eax)
+ call *SYMBOL_NAME(hypercall_table)(,%eax,4)
movl %eax,XREGS_eax(%esp) # save the return value
test_all_events:
movl %esp,%edx
pushl %edx # push the xen_regs pointer
GET_CURRENT(%ebx)
+ PERFC_INCR(PERFC_exceptions, %eax)
call *SYMBOL_NAME(exception_table)(,%eax,4)
addl $4,%esp
movl XREGS_eflags(%esp),%eax
* to extract and format the required data.
*/
+#include <xen/config.h>
#include <xen/sched.h>
#define DEFINE(_sym, _val) \
OFFSET(TRAPBOUNCE_eip, struct trap_bounce, eip);
BLANK();
+#if PERF_COUNTERS
+ OFFSET(PERFC_hypercalls, struct perfcounter, hypercalls);
+ OFFSET(PERFC_exceptions, struct perfcounter, exceptions);
+ BLANK();
+#endif
+
OFFSET(MULTICALL_op, multicall_entry_t, op);
OFFSET(MULTICALL_arg0, multicall_entry_t, args[0]);
OFFSET(MULTICALL_arg1, multicall_entry_t, args[1]);
movq %r10,%rcx
andq $(NR_hypercalls-1),%rax
leaq SYMBOL_NAME(hypercall_table)(%rip),%r10
+ PERFC_INCR(PERFC_hypercalls, %rax)
callq *(%r10,%rax,8)
movq %rax,XREGS_rax(%rsp) # save the return value
movl XREGS_entry_vector(%rsp),%eax
leaq SYMBOL_NAME(exception_table)(%rip),%rdx
GET_CURRENT(%rbx)
+ PERFC_INCR(PERFC_exceptions, %rax)
callq *(%rdx,%rax,8)
testb $3,XREGS_cs(%rsp)
jz restore_all_xen
sum += atomic_read(&counters[j]);
printk("TOTAL[%10d] ", sum);
for ( j = 0; j < perfc_info[i].nr_elements; j++ )
+ {
+ if ( (j != 0) && ((j % 4) == 0) )
+ printk("\n ");
printk("ARR%02d[%10d] ", j, atomic_read(&counters[j]));
+ }
counters += j;
break;
}
SET_XEN_SEGMENTS(_reg) \
1:
+#ifdef PERF_COUNTERS
+#define PERFC_INCR(_name,_idx) \
+ lock incl SYMBOL_NAME(perfcounters)+_name(,_idx,4)
+#else
+#define PERFC_INCR(_name,_idx)
+#endif
+
#endif
#define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
popq %rsi; \
popq %rdi;
+#ifdef PERF_COUNTERS
+#define PERFC_INCR(_name,_idx) \
+ pushq %rdx; \
+ leaq SYMBOL_NAME(perfcounters)+_name(%rip),%rdx; \
+ lock incl (%rdx,_idx,4); \
+ popq %rdx;
+#else
+#define PERFC_INCR(_name,_idx)
+#endif
+
#endif
#define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
#define perfc_value(x) atomic_read(&perfcounters.x[0])
#define perfc_valuec(x) atomic_read(&perfcounters.x[smp_processor_id()])
-#define perfc_valuea(x,y) \
- { if(y<(sizeof(perfcounters.x)/sizeof(*perfcounters.x))) \
- atomic_read(&perfcounters.x[y]); }
+#define perfc_valuea(x,y) \
+ do { \
+ if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \
+ atomic_read(&perfcounters.x[y]); \
+ } while ( 0 )
#define perfc_set(x,v) atomic_set(&perfcounters.x[0], v)
#define perfc_setc(x,v) atomic_set(&perfcounters.x[smp_processor_id()], v)
-#define perfc_seta(x,y,v) \
- { if(y<(sizeof(perfcounters.x)/sizeof(*perfcounters.x))) \
- atomic_set(&perfcounters.x[y], v); }
+#define perfc_seta(x,y,v) \
+ do { \
+ if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \
+ atomic_set(&perfcounters.x[y], v); \
+ } while ( 0 )
#define perfc_incr(x) atomic_inc(&perfcounters.x[0])
#define perfc_decr(x) atomic_dec(&perfcounters.x[0])
#define perfc_incrc(x) atomic_inc(&perfcounters.x[smp_processor_id()])
-#define perfc_incra(x,y) \
- { if(y<(sizeof(perfcounters.x)/sizeof(*perfcounters.x))) \
- atomic_inc(&perfcounters.x[y]); }
+#define perfc_incra(x,y) \
+ do { \
+ if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \
+ atomic_inc(&perfcounters.x[y]); \
+ } while ( 0 )
#define perfc_add(x,y) atomic_add((y), &perfcounters.x[0])
#define perfc_addc(x,y) atomic_add((y), &perfcounters.x[smp_processor_id()])
-#define perfc_adda(x,y,z) \
- { if(y<(sizeof(perfcounters.x)/sizeof(*perfcounters.x))) \
- atomic_add((z), &perfcounters.x[y]); }
+#define perfc_adda(x,y,z) \
+ do { \
+ if ( (y) < (sizeof(perfcounters.x) / sizeof(*perfcounters.x)) ) \
+ atomic_add((z), &perfcounters.x[y]); \
+ } while ( 0 )
+/*
+ * Histogram: special treatment for 0 and 1 count. After that equally spaced
+ * with last bucket taking the rest.
+ */
+#define perfc_incr_histo(_x,_v,_n) \
+ do { \
+ if ( (_v) == 0 ) \
+ perfc_incra(_x, 0); \
+ else if ( (_v) == 1 ) \
+ perfc_incra(_x, 1); \
+ else if ( (((_v)-2) / PERFC_ ## _n ## _BUCKET_SIZE) < \
+ (PERFC_MAX_ ## _n - 3) ) \
+ perfc_incra(_x, (((_v)-2) / PERFC_ ## _n ## _BUCKET_SIZE) + 2); \
+ else \
+ perfc_incra(_x, PERFC_MAX_ ## _n - 1); \
+ } while ( 0 )
+
#else /* PERF_COUNTERS */
#define perfc_value(x) (0)
#define perfc_add(x,y) ((void)0)
#define perfc_addc(x,y) ((void)0)
#define perfc_adda(x,y,z) ((void)0)
+#define perfc_incr_histo(x,y,z) ((void)0)
#endif /* PERF_COUNTERS */
PERFCOUNTER_CPU( check_pagetable, "calls to check_pagetable" )
PERFCOUNTER_CPU( check_all_pagetables, "calls to check_all_pagetables" )
+#define PERFC_MAX_PT_UPDATES 64
+#define PERFC_PT_UPDATES_BUCKET_SIZE 3
+PERFCOUNTER_ARRAY( wpt_updates, "writable pt updates", PERFC_MAX_PT_UPDATES )
+PERFCOUNTER_ARRAY( bpt_updates, "batched pt updates", PERFC_MAX_PT_UPDATES )
+
+PERFCOUNTER_ARRAY( hypercalls, "hypercalls", NR_hypercalls )
+PERFCOUNTER_ARRAY( exceptions, "exceptions", 32 )
+
#define VMX_PERF_EXIT_REASON_SIZE 37
#define VMX_PERF_VECTOR_SIZE 0x20
-PERFCOUNTER_ARRAY(vmexits, "vmexits", VMX_PERF_EXIT_REASON_SIZE )
-PERFCOUNTER_ARRAY(cause_vector, "cause vector", VMX_PERF_VECTOR_SIZE )
+PERFCOUNTER_ARRAY( vmexits, "vmexits", VMX_PERF_EXIT_REASON_SIZE )
+PERFCOUNTER_ARRAY( cause_vector, "cause vector", VMX_PERF_VECTOR_SIZE )